In [1]:
import plotly.graph_objects as go
from plotly.subplots import make_subplots

import numpy as np
import copy
from IPython.display import display, HTML, Markdown
import scipy.signal

def image_scale():
    return 180

def grayscale_heatmap(img):
    return go.Heatmap(
        z=img,
        colorscale = [[0, 'rgb(0,0,0)'], [1, 'rgb(255,255,255)']],
        showscale = False,
        hoverinfo = "skip",
    )  

def kernel_normalize(kernel):
    s = 0
    for y in range(len(kernel)):
        for x in range(len(kernel[y])):
            s += kernel[y][x]
    for y in range(len(kernel)):
        for x in range(len(kernel[y])):
            kernel[y][x] /= s
    
    return kernel

def kernel_proposed():
    return kernel_normalize([
        [2,4,2],
        [4,7.8,4],
        [2,4,2]
    ])

def kernel_proposed6():
    return kernel_normalize([
        [2,4,2],
        [4,7.96,4],
        [2,4,2]
    ])

def kernel_github():
    return [
        [1/16, 1/8, 1/16],
        [1/8, 1/4, 1/8],
        [1/16, 1/8, 1/16],
    ]

def kernel_paper():
    return [
        [1/256, 1/64, 3/128, 1/64, 1/256],
        [1/64, 1/16, 3/32, 1/16, 1/64],
        [3/128, 3/32, 9/64, 3/32, 3/128],
        [1/64, 1/16, 3/32, 1/16, 1/64],
        [1/256, 1/64, 3/128, 1/64, 1/256],
    ]
    
def kernel_paper_cropped():
    return kernel_normalize([
        [1/16, 3/32, 1/16],
        [3/32, 9/64, 3/32],
        [1/16, 3/32, 1/16],
    ])

def h_convolution(img, scale, base_kernel, mode="full"):
    kernel = []
    
    for y in range(scale*(len(base_kernel) - 1)+1):
        kernel.append([])
        for x in range(scale*(len(base_kernel[int(y / scale)]) - 1)+1):
            kernel[y].append(0)
            if y % scale == 0 and x % scale == 0:
                kernel[y][x] = base_kernel[int(y / scale)][int(x / scale)]

    return scipy.signal.convolve2d(img, kernel, mode=mode)

def texture_center_dot(size=1):
    img = np.zeros((size,size))
    for y in range(size):
        for x in range(size):
            img[y][x] = 0
    img[int(size/2)][int(size/2)] = 1
    return img
In [2]:
img = texture_center_dot()
kernel_scale = 1

display(Markdown("## From small to big kernel"))

fig = make_subplots(
    rows=1, cols=4,
    vertical_spacing=0.01,
    horizontal_spacing=0.02,
)

fig.update_layout(
    margin = dict(t=8, r=8, b=8, l=8),
    width = image_scale()*6, height = image_scale()*1.5,
    autosize = False,
)

img = texture_center_dot(31)
kernel_scale = 1

for k in range(4):
    img = h_convolution(img, kernel_scale, kernel_github(), mode="same")
    fig.add_trace(
        grayscale_heatmap(img),
        row=1, col=k+1,
    )
    kernel_scale = int(kernel_scale * 2)

fig.update_xaxes(showticklabels = False, constrain="domain")
fig.update_yaxes(showticklabels = False, constrain="domain")

fig.layout.xaxis1.scaleanchor = "y1"
fig.layout.xaxis2.scaleanchor = "y2"
fig.layout.xaxis3.scaleanchor = "y3"
fig.layout.xaxis4.scaleanchor = "y4"

fig.show()

display(Markdown("## From big to small kernel"))


fig = make_subplots(
    rows=1, cols=4,
    vertical_spacing=0.01,
    horizontal_spacing=0.02,
)

fig.update_layout(
    margin = dict(t=8, r=8, b=8, l=8),
    width = image_scale()*6, height = image_scale()*1.5,
    autosize = False,
)

img = texture_center_dot()
kernel_scale = 8

for k in range(4):
    img = h_convolution(img, kernel_scale, kernel_github())
    fig.add_trace(
        grayscale_heatmap(img),
        row=1, col=k+1,
    )
    kernel_scale = int(kernel_scale * 0.5)

fig.update_xaxes(showticklabels = False, constrain="domain")
fig.update_yaxes(showticklabels = False, constrain="domain")

fig.layout.xaxis1.scaleanchor = "y1"
fig.layout.xaxis2.scaleanchor = "y2"
fig.layout.xaxis3.scaleanchor = "y3"
fig.layout.xaxis4.scaleanchor = "y4"
    
fig.show()

From small to big kernel¶

From big to small kernel¶

In [3]:
display(Markdown(r"""
## Kernel comparison

|Kernel|Samples|Iterations|Total samples|Equivalent full kernel size|Relative efficiency|
|----|:----:|:----:|:----:|:----:|:----:|
|Proposed|9|4|36|17|8.00|
|Github - https://github.com/ZheyuanXie/CUDA-Path-Tracer-Denoising|9|4|36|17|8.00|
|F. Murtagh. Multiscale Transform Methods in Data Analysis<br>1D: ($\frac{1}{16}, \frac{1}{4}, \frac{3}{8}, \frac{1}{4}, \frac{1}{16}$)|25|3|75|13|2.25|
|F. Murtagh. Multiscale Transform Methods in Data Analysis - cropped to 3x3 and normalized<br>1D: ($\frac{1}{4}, \frac{3}{8}, \frac{1}{4}$) -> normalize|9|4|36|17|8.00|

<br>

"""))

fig = make_subplots(
    rows=1, cols=4,
    vertical_spacing=0.01,
    horizontal_spacing=0.02,
    subplot_titles=("Proposed", "Github", "F. Murtagh", "F. Murtagh<br>(cropped & normalized)"),
)

fig.update_layout(
    margin = dict(t=48, r=8, b=8, l=8),
    width = image_scale()*6,
    height = image_scale()*1.8,
    autosize = False,
)

img = texture_center_dot()
kernel_scale = 1
for k in range(4):
    img = h_convolution(img, kernel_scale, kernel_proposed())
    kernel_scale = int(kernel_scale * 2.0)
fig.add_trace(
    grayscale_heatmap(img),
    row=1, col=1,
)

img = texture_center_dot()
kernel_scale = 1
for k in range(4):
    img = h_convolution(img, kernel_scale, kernel_github())
    kernel_scale = int(kernel_scale * 2.0)
fig.add_trace(
    grayscale_heatmap(img),
    row=1, col=2,
)

img = texture_center_dot(len(img))
kernel_scale = 1
for k in range(3):
    img = h_convolution(img, kernel_scale, kernel_paper(), mode="same")
    kernel_scale = int(kernel_scale * 2.0)
fig.add_trace(
    grayscale_heatmap(img),
    row=1, col=3,
)

img = texture_center_dot()
kernel_scale = 1
for k in range(4):
    img = h_convolution(img, kernel_scale, kernel_paper_cropped())
    kernel_scale = int(kernel_scale * 2.0)
fig.add_trace(
    grayscale_heatmap(img),
    row=1, col=4,
)

fig.update_xaxes(showticklabels = False, constrain="domain")
fig.update_yaxes(showticklabels = False, constrain="domain")

fig.layout.xaxis1.scaleanchor = "y1"
fig.layout.xaxis2.scaleanchor = "y2"
fig.layout.xaxis3.scaleanchor = "y3"
fig.layout.xaxis4.scaleanchor = "y4"

fig.show()

Kernel comparison¶

Kernel Samples Iterations Total samples Equivalent full kernel size Relative efficiency
Proposed 9 4 36 17 8.00
Github - https://github.com/ZheyuanXie/CUDA-Path-Tracer-Denoising 9 4 36 17 8.00
F. Murtagh. Multiscale Transform Methods in Data Analysis
1D: ($\frac{1}{16}, \frac{1}{4}, \frac{3}{8}, \frac{1}{4}, \frac{1}{16}$)
25 3 75 13 2.25
F. Murtagh. Multiscale Transform Methods in Data Analysis - cropped to 3x3 and normalized
1D: ($\frac{1}{4}, \frac{3}{8}, \frac{1}{4}$) -> normalize
9 4 36 17 8.00


In [4]:
display(Markdown(r"""

## Artifact comparison

"""))

fig = make_subplots(
    rows=2, cols=2,
    vertical_spacing=0.05,
    horizontal_spacing=0.02,
    subplot_titles=("Github kernel - 6 iterations", "Proposed kernel - 6 iterations", "Github kernel - 4 iterations", "Proposed kernel \n 4 iterations"),
)

fig.update_layout(
    margin = dict(t=48, r=8, b=8, l=8),
    width = image_scale()*4, height = image_scale()*4,
    autosize = False
)

img = texture_center_dot()
kernel_scale = 1

for k in range(6):
    img = h_convolution(img, kernel_scale, kernel_github())
    kernel_scale = int(kernel_scale * 2)

fig.add_trace(
    grayscale_heatmap(img),
    row=1, col=1,
)

img = texture_center_dot()
kernel_scale = 1

for k in range(6):
    img = h_convolution(img, kernel_scale, kernel_proposed())
    kernel_scale = int(kernel_scale * 2)

fig.add_trace(
    grayscale_heatmap(img),
    row=1, col=2,
)

img = texture_center_dot()
kernel_scale = 1

for k in range(4):
    img = h_convolution(img, kernel_scale, kernel_github())
    kernel_scale = int(kernel_scale * 2)

fig.add_trace(
    grayscale_heatmap(img),
    row=2, col=1,
)

img = texture_center_dot()
kernel_scale = 1

for k in range(4):
    img = h_convolution(img, kernel_scale, kernel_proposed())
    kernel_scale = int(kernel_scale * 2)

fig.add_trace(
    grayscale_heatmap(img),
    row=2, col=2,
)

fig.update_xaxes(showticklabels = False, constrain="domain")
fig.update_yaxes(showticklabels = False, constrain="domain")

fig.layout.xaxis1.scaleanchor = "y1"
fig.layout.xaxis2.scaleanchor = "y2"
fig.layout.xaxis3.scaleanchor = "y3"
fig.layout.xaxis4.scaleanchor = "y4"

fig.show()

display(Markdown(r"""

### Github kernel

https://github.com/ZheyuanXie/CUDA-Path-Tracer-Denoising<br>
<img src="https://raw.githubusercontent.com/ZheyuanXie/CUDA-Path-Tracer-Denoising/master/img/atrous_kernel.png" width="600">

**Proposed** kernel has slightly reduced center coefficient.<br>
It reduces the bright cross on 4 iterstions without significant artifacts,<br>
but creates regular artifacts on 6 iterations.

It appears as the number of iterations increases, the kernel becomes more sensitive to alterations

"""))

fig = make_subplots(
    rows=2, cols=3,
    vertical_spacing=0.1,
    horizontal_spacing=0.02,
    subplot_titles=(
        "Github kernel <br> 6 iterations", "Proposed kernel <br> 6 iterations <br> less center weight reduction", "Difference",
        "Github kernel <br> 4 iterations", "Proposed kernel <br> 4 iterations", "Difference"
    ),
)

fig.update_layout(
    margin = dict(t=68, r=8, b=8, l=8),
    width = image_scale()*5, height = image_scale()*4,
    autosize = False
)

img = texture_center_dot()
kernel_scale = 1

for k in range(6):
    img = h_convolution(img, kernel_scale, kernel_github())
    kernel_scale = int(kernel_scale * 2)

fig.add_trace(
    grayscale_heatmap(img),
    row=1, col=1,
)

img2 = texture_center_dot()
kernel_scale = 1

for k in range(6):
    img2 = h_convolution(img2, kernel_scale, kernel_proposed6())
    kernel_scale = int(kernel_scale * 2)

fig.add_trace(
    grayscale_heatmap(img2),
    row=1, col=2,
)

img = np.absolute(np.subtract(img, img2))

fig.add_trace(
    grayscale_heatmap(img),
    row=1, col=3,
)

img = texture_center_dot()
kernel_scale = 1

for k in range(4):
    img = h_convolution(img, kernel_scale, kernel_github())
    kernel_scale = int(kernel_scale * 2)

fig.add_trace(
    grayscale_heatmap(img),
    row=2, col=1,
)

img2 = texture_center_dot()
kernel_scale = 1

for k in range(4):
    img2 = h_convolution(img2, kernel_scale, kernel_proposed())
    kernel_scale = int(kernel_scale * 2)

fig.add_trace(
    grayscale_heatmap(img2),
    row=2, col=2,
)

img = np.absolute(np.subtract(img, img2))

fig.add_trace(
    grayscale_heatmap(img),
    row=2, col=3,
)

fig.update_xaxes(showticklabels = False, constrain="domain")
fig.update_yaxes(showticklabels = False, constrain="domain")

fig.layout.xaxis1.scaleanchor = "y1"
fig.layout.xaxis2.scaleanchor = "y2"
fig.layout.xaxis3.scaleanchor = "y3"
fig.layout.xaxis4.scaleanchor = "y4"
fig.layout.xaxis5.scaleanchor = "y5"
fig.layout.xaxis6.scaleanchor = "y6"

fig.show()

display(Markdown(r"""

With 6 iterations reducing center weight can not significantly reduce the bright cross without creating artifacts.<br>
It appears kernel alterations are only feasible for small number of iterations (less than 6)

""")) 

Artifact comparison¶

Github kernel¶

https://github.com/ZheyuanXie/CUDA-Path-Tracer-Denoising

Proposed kernel has slightly reduced center coefficient.
It reduces the bright cross on 4 iterstions without significant artifacts,
but creates regular artifacts on 6 iterations.

It appears as the number of iterations increases, the kernel becomes more sensitive to alterations

With 6 iterations reducing center weight can not significantly reduce the bright cross without creating artifacts.
It appears kernel alterations are only feasible for small number of iterations (less than 6)

In [5]:
display(Markdown(r"""

## 1D convergence test

"""))

def convolution_1D(arr, kernel):
    buf = []
    
    for position in range(len(arr)):
            value = 0

            for kernel_position in range(len(kernel)):
                sampling_position = position + kernel_position - int(len(kernel) / 2)
                sampling_position = (sampling_position + len(arr)) % len(arr)
                value += arr[sampling_position] * kernel[kernel_position]

            buf.append(value)
    
    return buf

def demo_kernel(base_kernel, color, iterations=5, name="", start=0, arr=None, big_to_small=False, total_iterations=0):
    if arr == None:
        arr = []
        for i in range(61):
            arr.append(0)

        arr[30] = 1
    
    gap = 1
    if big_to_small:
        for i in range(total_iterations - start - 2):
            gap *= 2
    else:
        for i in range(start):
            gap *= 2

    for iteration in range(iterations):
        fig.add_trace(
            go.Scatter(
                x=list(range(len(arr))),
                y=arr,
                mode='lines',
                name=name,
                showlegend = True if iteration == 0 and start == 0 else False,
                line_color=color,
            ),
            row = 1,
            col = iteration + 1 + start,
        )

        kernel = []
        for base_kernel_index in range(len(base_kernel) - 1):
            kernel.append(base_kernel[base_kernel_index])
            for _ in range(gap - 1):
                kernel.append(0)
        kernel.append(base_kernel[-1])
        gap = int(gap * (0.5 if big_to_small else 2))

        arr = convolution_1D(arr, kernel)
        
    return arr
  
display(Markdown(r"""

|||
|----|----|
|F. Murtagh|F. Murtagh. Multiscale Transform Methods in Data Analysis<br>($\frac{1}{16}, \frac{1}{4}, \frac{3}{8}, \frac{1}{4}, \frac{1}{16}$)|
|F. Murtagh (cropped and normalized)|($\frac{1}{4}, \frac{3}{8}, \frac{1}{4}$) -> normalize -> (0.28, 0.42, 0.28)|
|Github|<br>https://github.com/ZheyuanXie/CUDA-Path-Tracer-Denoising<br>$(\frac{1}{4}, \frac{1}{2}, \frac{1}{4})$|

"""))

fig = make_subplots(
    rows=1, cols=5,
    vertical_spacing=0.1,
    horizontal_spacing=0.02,
)

fig.update_layout(
    margin = dict(t=8, r=8, b=8, l=8),
    width = image_scale()*7, height = image_scale()*1.5,
    autosize = False,
    legend_traceorder="reversed",
)


cropped = [1/4,3/8,1/4]
cropped_sum = sum(cropped)
for i in range(len(cropped)):
    cropped[i] /= cropped_sum
demo_kernel(cropped, color="goldenrod", name="F. Murtagh (cropped)")
demo_kernel([1/16,1/4,3/8,1/4,1/16], color="mediumpurple", name="F. Murtagh")
demo_kernel([1/4,1/2,1/4], color="green", name="Github")

#fig.legend(["F. Murtagh", "F. Murtagh (cropped)", "Github"], loc="upper left", fontsize=16)
#plt.show()

fig.show()

display(Markdown(r"""

Cropped to 3 samples F. Murtagh kernel does not converge at all, even though it is quite close to github kernel.<br>
Github kernel converges perfectly to a triangle.<br><br>

"""))

fig = make_subplots(
    rows=1, cols=6,
    vertical_spacing=0.1,
    horizontal_spacing=0.02,
)

fig.update_layout(
    margin = dict(t=8, r=8, b=8, l=8),
    width = image_scale()*7, height = image_scale()*1.5,
    autosize = False,
    legend_traceorder="reversed",
)

demo_kernel([0.253,0.494,0.253], color="mediumpurple", iterations=6, name="Proposed")
fig.show()

display(Markdown(r"""

Factorized into 1D, proposed kernel does not converge smoothly.<br>
But in 2D it gets rid of the bright cross at the cost of marginally less smooth distance fading.

It appears constructing a good 2D kernel is more complex than just multiplying row-vector and column-vector of a good 1D kernel.

**Edit:**

Proposed kernel will only converge in 2D with small number of iterations (less than 6),<br>
with 6+iterations it generates artifacts.

It appears 2D convergence and 1D convergence are connected, but on small number of iterations 2D convergence is possible without 1D convergence.

Also, the theory of the kernel becoming more sensitive to changes with increased iteration count is confirmed

**Idea**: use modified kernel on first iterations, then switch to non-modified. Is it better to start with large of small kernel?

"""))

fig = make_subplots(
    rows=1, cols=6,
    vertical_spacing=0.1,
    horizontal_spacing=0.02,
)

fig.update_layout(
    margin = dict(t=8, r=8, b=8, l=8),
    width = image_scale()*7, height = image_scale()*1.5,
    autosize = False,
    legend_traceorder="reversed",
)

arr = demo_kernel([0.253,0.494,0.253], color="mediumpurple", iterations=4, name="4 x Proposed + 2 x Github<br>small to big")
demo_kernel([1/4,1/2,1/4], color="mediumpurple", iterations=2,start=4, arr=arr)
fig.show()

fig = make_subplots(
    rows=1, cols=6,
    vertical_spacing=0.1,
    horizontal_spacing=0.02,
)

fig.update_layout(
    margin = dict(t=8, r=8, b=8, l=8),
    width = image_scale()*7, height = image_scale()*1.5,
    autosize = False,
    legend_traceorder="reversed",
)

arr = demo_kernel([0.253,0.494,0.253], color="green", iterations=4, name="4 x Proposed + 2 x Github<br>big to small", big_to_small=True, total_iterations=6)
demo_kernel([1/4,1/2,1/4], color="green", iterations=2, start=4, arr=arr, big_to_small=True, total_iterations=6)
fig.show()

display(Markdown(r"""

Smooth convergence is achieved when going big to small kernel, using modified kernel on first 3 (big) iterations

**Question**: will it converge when going small to big and using modified kernel on **last** (big) iterations?

"""))

fig = make_subplots(
    rows=1, cols=6,
    vertical_spacing=0.1,
    horizontal_spacing=0.02,
)

fig.update_layout(
    margin = dict(t=8, r=8, b=8, l=8),
    width = image_scale()*7, height = image_scale()*1.5,
    autosize = False,
    legend_traceorder="reversed",
)

arr = demo_kernel([1/4,1/2,1/4], color="mediumpurple", iterations=2, name="2 x Github + 4 x Proposed <br>small to big")
demo_kernel([0.253,0.494,0.253], color="mediumpurple", iterations=4, start=2, arr=arr)
fig.show()

display(Markdown(r"""

It does!

**Todo**: test in 2D

"""))

fig = make_subplots(
    rows=1, cols=5,
    vertical_spacing=0.1,
    horizontal_spacing=0.02,
)

fig.update_layout(
    margin = dict(t=8, r=8, b=8, l=8),
    width = image_scale()*7, height = image_scale()*1.5,
    autosize = False,
    legend_traceorder="reversed",
)

demo_kernel([1/3,1/3,1/3], name="(1/3, 1/3, 1/3)", color="mediumpurple")
fig.show()

display(Markdown(r"""

<br>
Kernel $(\frac{1}{3}, \frac{1}{3}, \frac{1}{3})$ does not converge. **Need to figure out the convergence condition**.

**ToDo**: make a demo comparing 6 iterations of 1D kernel with different center values

"""))

1D convergence test¶

F. Murtagh F. Murtagh. Multiscale Transform Methods in Data Analysis
($\frac{1}{16}, \frac{1}{4}, \frac{3}{8}, \frac{1}{4}, \frac{1}{16}$)
F. Murtagh (cropped and normalized) ($\frac{1}{4}, \frac{3}{8}, \frac{1}{4}$) -> normalize -> (0.28, 0.42, 0.28)
Github
https://github.com/ZheyuanXie/CUDA-Path-Tracer-Denoising
$(\frac{1}{4}, \frac{1}{2}, \frac{1}{4})$

Cropped to 3 samples F. Murtagh kernel does not converge at all, even though it is quite close to github kernel.
Github kernel converges perfectly to a triangle.

Factorized into 1D, proposed kernel does not converge smoothly.
But in 2D it gets rid of the bright cross at the cost of marginally less smooth distance fading.

It appears constructing a good 2D kernel is more complex than just multiplying row-vector and column-vector of a good 1D kernel.

Edit:

Proposed kernel will only converge in 2D with small number of iterations (less than 6),
with 6+iterations it generates artifacts.

It appears 2D convergence and 1D convergence are connected, but on small number of iterations 2D convergence is possible without 1D convergence.

Also, the theory of the kernel becoming more sensitive to changes with increased iteration count is confirmed

Idea: use modified kernel on first iterations, then switch to non-modified. Is it better to start with large of small kernel?

Smooth convergence is achieved when going big to small kernel, using modified kernel on first 3 (big) iterations

Question: will it converge when going small to big and using modified kernel on last (big) iterations?

It does!

Todo: test in 2D


Kernel $(\frac{1}{3}, \frac{1}{3}, \frac{1}{3})$ does not converge. Need to figure out the convergence condition.

ToDo: make a demo comparing 6 iterations of 1D kernel with different center values

In [6]:
display(Markdown(r"""

## Combination / compromise

"""))

fig = make_subplots(
    rows=1, cols=3,
    vertical_spacing=0.01,
    horizontal_spacing=0.02,
    subplot_titles=("Github", "2 x Github + 4 x Proposed", "Difference"),
)

fig.update_layout(
    margin = dict(t=48, r=8, b=8, l=8),
    width = image_scale()*5,
    height = image_scale()*1.7,
    autosize = False,
)

img = texture_center_dot()
kernel_scale = 1
for k in range(6):
    img = h_convolution(img, kernel_scale, kernel_github())
    kernel_scale = int(kernel_scale * 2.0)
fig.add_trace(
    grayscale_heatmap(img),
    row=1, col=1,
)

img2 = texture_center_dot()
kernel_scale = 1
for k in range(2):
    img2 = h_convolution(img2, kernel_scale, kernel_github())
    kernel_scale = int(kernel_scale * 2.0)
for k in range(4):
    img2 = h_convolution(img2, kernel_scale, kernel_proposed())
    kernel_scale = int(kernel_scale * 2.0)
fig.add_trace(
    grayscale_heatmap(img2),
    row=1, col=2,
)

img = (np.subtract(img, img2))
fig.add_trace(
    grayscale_heatmap(img),
    row=1, col=3,
)

fig.update_xaxes(showticklabels = False, constrain="domain")
fig.update_yaxes(showticklabels = False, constrain="domain")

fig.layout.xaxis1.scaleanchor = "y1"
fig.layout.xaxis2.scaleanchor = "y2"
fig.layout.xaxis3.scaleanchor = "y3"

fig.show()

fig = make_subplots(
    rows=1, cols=3,
    vertical_spacing=0.01,
    horizontal_spacing=0.02,
    subplot_titles=("Github", "Proposed with less center<br>weight reduction", "Difference"),
)

fig.update_layout(
    margin = dict(t=48, r=8, b=8, l=8),
    width = image_scale()*5,
    height = image_scale()*1.7,
    autosize = False,
)

img = texture_center_dot()
kernel_scale = 1
for k in range(6):
    img = h_convolution(img, kernel_scale, kernel_github())
    kernel_scale = int(kernel_scale * 2.0)
fig.add_trace(
    grayscale_heatmap(img),
    row=1, col=1,
)

img2 = texture_center_dot()
kernel_scale = 1
for k in range(6):
    img2 = h_convolution(img2, kernel_scale, kernel_proposed6())
    kernel_scale = int(kernel_scale * 2.0)
fig.add_trace(
    grayscale_heatmap(img2),
    row=1, col=2,
)


img = (np.subtract(img, img2))
fig.add_trace(
    grayscale_heatmap(img),
    row=1, col=3,
)

fig.update_xaxes(showticklabels = False, constrain="domain")
fig.update_yaxes(showticklabels = False, constrain="domain")

fig.layout.xaxis1.scaleanchor = "y1"
fig.layout.xaxis2.scaleanchor = "y2"
fig.layout.xaxis3.scaleanchor = "y3"

fig.show()

fig = make_subplots(
    rows=1, cols=3,
    vertical_spacing=0.01,
    horizontal_spacing=0.02,
    subplot_titles=("Github", "2 x Proposed with less center<br>weight reduction + 4 x Proposed", "Difference"),
)

fig.update_layout(
    margin = dict(t=48, r=8, b=8, l=8),
    width = image_scale()*5,
    height = image_scale()*1.7,
    autosize = False,
)

img = texture_center_dot()
kernel_scale = 1
for k in range(6):
    img = h_convolution(img, kernel_scale, kernel_github())
    kernel_scale = int(kernel_scale * 2.0)
fig.add_trace(
    grayscale_heatmap(img),
    row=1, col=1,
)

img2 = texture_center_dot()
kernel_scale = 1
for k in range(2):
    img2 = h_convolution(img2, kernel_scale, kernel_proposed6())
    kernel_scale = int(kernel_scale * 2.0)
for k in range(4):
    img2 = h_convolution(img2, kernel_scale, kernel_proposed())
    kernel_scale = int(kernel_scale * 2.0)
fig.add_trace(
    grayscale_heatmap(img2),
    row=1, col=2,
)


img = (np.subtract(img, img2))
fig.add_trace(
    grayscale_heatmap(img),
    row=1, col=3,
)

fig.update_xaxes(showticklabels = False, constrain="domain")
fig.update_yaxes(showticklabels = False, constrain="domain")

fig.layout.xaxis1.scaleanchor = "y1"
fig.layout.xaxis2.scaleanchor = "y2"
fig.layout.xaxis3.scaleanchor = "y3"

fig.show()

display(Markdown(r"""

* Using 2x Github + 4x Proposed does visibly reduce the bright cross, but the difference is small
* Using proposed with less center weight reduction has little to no visible effect
* Using proposed with less center weight reduction instead of github in combination with proposed does more harm than good (artifacts)

"""))

Combination / compromise¶

  • Using 2x Github + 4x Proposed does visibly reduce the bright cross, but the difference is small
  • Using proposed with less center weight reduction has little to no visible effect
  • Using proposed with less center weight reduction instead of github in combination with proposed does more harm than good (artifacts)
In [7]:
display(Markdown(r"""

1. Kernel = (1, center_coef, 1)
2. Normalize kernel (divide by sum)
3. Five convolutions with increasing sparse kernel size (padding with zeroes)

"""))

def test_1D_kernel(center_coef, scale):
    base_kernel = [1, center_coef, 1]
    s = sum(base_kernel)
    for i in range(len(base_kernel)):
        base_kernel[i] /= s
    kernel = []
    for i in range(scale*2+1):
        if i % scale == 0:
            kernel.append(base_kernel[int(i / scale)])
        else:
            kernel.append(0)
    return kernel


fig = make_subplots(
    rows=6, cols=5,
    vertical_spacing=0.05,
    horizontal_spacing=0.02,
    subplot_titles=[("center coef: " + str(round(2+i*0.1,1))) for i in range(-10, 20)],
)

fig.update_layout(
    margin = dict(t=48, r=8, b=8, l=8),
    width = image_scale()*4, height = image_scale()*4,
    autosize = False,
    legend_traceorder="reversed",
)

fig.update_xaxes(showticklabels = False)
fig.update_yaxes(showticklabels = False)

for i in range(-10, 20):
    center_coef = 2 + i*0.1;
    
    arr = [0 for x in range(61)]
    arr[30] = 1
    
    kernel_scale = 16
    for k in range(5):
        kernel = test_1D_kernel(center_coef, kernel_scale)
        arr = convolution_1D(arr, kernel)
        kernel_scale = int(kernel_scale * 0.5)
    
    fig.add_trace (
        go.Scatter(
            x=list(range(len(arr))),
            y=arr,
            mode='lines',
            showlegend = False,
            line_color="darkblue" if i else "red",
        ),
        row = int((i+10) / 5) + 1,
        col = (i+10) % 5 + 1,
    )    

fig.show()

display(Markdown(r"""

<br>
Only one (github) kernel converges: $(\frac{1}{4}, \frac{1}{2}, \frac{1}{4})$<br>
Moving away from this value steadily increases noise
<br><br>

**Question:** is there only one converging kernel for each given size, or larger sizes can have multiple different convering kernels?

"""))
  1. Kernel = (1, center_coef, 1)
  2. Normalize kernel (divide by sum)
  3. Five convolutions with increasing sparse kernel size (padding with zeroes)


Only one (github) kernel converges: $(\frac{1}{4}, \frac{1}{2}, \frac{1}{4})$
Moving away from this value steadily increases noise

Question: is there only one converging kernel for each given size, or larger sizes can have multiple different convering kernels?

In [8]:
display(Markdown(r"""

## Plotly

Reference (examples) for using plotly

"""))

fig = make_subplots(
    rows=2, cols=2,
    column_widths=[0.5, 0.5],
    row_heights=[0.5, 0.5],
    vertical_spacing=0.01,
    horizontal_spacing=0.01,
)

fig.add_trace(
    grayscale_heatmap(img),
    row=1, col=1,
)

fig.add_trace(
    grayscale_heatmap(img),
    row=1, col=2,
)

fig.add_trace(
    go.Scatter(
        x=list(range(10)),
        y=list(range(10)),
    ),
    row=2, col=2,
)

fig.update_layout(
    margin = dict(t=0,r=0,b=0,l=0),
    width = 700, height = 700,
    autosize = False,
)

fig.update_layout(
    shapes=[
        dict(type="rect", xref="x4", yref="y4",
            x0=0, y0=0, x1=10, y1=10,
             line = dict(
                 width=0,
             ),
             fillcolor="black",
             layer="below",
        ),
        dict(type="rect", xref="x4", yref="y4",
            x0=6, y0=3, x1=9, y1=7,
            line = dict(
                width = 3,
                color = "red",
            )
        ),
    ]
)

fig.update_xaxes(showticklabels = False)
fig.update_yaxes(showticklabels = False)

fig.update_xaxes(
    showline=True, linewidth=2, linecolor='black', mirror=True, showticklabels=True,
    row=2, col=2
)

fig.update_yaxes(
    showline=True, linewidth=2, linecolor='black', mirror=True, showticklabels=True,
    row=2, col=2
)

fig.show()

Plotly¶

Reference (examples) for using plotly

In [ ]: